chatterer 0.1.12__py3-none-any.whl → 0.1.14__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
chatterer/__init__.py CHANGED
@@ -1,10 +1,14 @@
1
- from .language_model import Chatterer, interactive_shell
1
+ from .interactive import interactive_shell
2
+ from .language_model import Chatterer
2
3
  from .messages import (
3
4
  AIMessage,
4
5
  BaseMessage,
6
+ BaseMessageChunk,
5
7
  FunctionMessage,
6
8
  HumanMessage,
9
+ LanguageModelInput,
7
10
  SystemMessage,
11
+ UsageMetadata,
8
12
  )
9
13
  from .strategies import (
10
14
  AoTPipeline,
@@ -13,18 +17,33 @@ from .strategies import (
13
17
  BaseStrategy,
14
18
  )
15
19
  from .tools import (
20
+ CodeSnippets,
21
+ MarkdownLink,
22
+ PdfToMarkdown,
23
+ PlayWrightBot,
24
+ PlaywrightLaunchOptions,
25
+ PlaywrightOptions,
26
+ PlaywrightPersistencyOptions,
27
+ UpstageDocumentParseParser,
28
+ acaption_markdown_images,
16
29
  anything_to_markdown,
30
+ caption_markdown_images,
17
31
  citation_chunker,
32
+ extract_text_from_pdf,
18
33
  get_default_html_to_markdown_options,
34
+ get_default_playwright_launch_options,
35
+ get_youtube_video_details,
19
36
  get_youtube_video_subtitle,
20
37
  html_to_markdown,
21
- init_webpage_to_markdown,
38
+ open_pdf,
22
39
  pdf_to_text,
23
40
  pyscripts_to_snippets,
24
- get_youtube_video_details,
41
+ render_pdf_as_image,
25
42
  )
26
43
  from .utils import (
44
+ ArgumentSpec,
27
45
  Base64Image,
46
+ BaseArguments,
28
47
  CodeExecutionResult,
29
48
  FunctionSignature,
30
49
  get_default_repl_tool,
@@ -49,7 +68,6 @@ __all__ = [
49
68
  "AIMessage",
50
69
  "FunctionMessage",
51
70
  "Base64Image",
52
- "init_webpage_to_markdown",
53
71
  "FunctionSignature",
54
72
  "CodeExecutionResult",
55
73
  "get_default_repl_tool",
@@ -57,4 +75,23 @@ __all__ = [
57
75
  "get_youtube_video_subtitle",
58
76
  "get_youtube_video_details",
59
77
  "interactive_shell",
78
+ "UpstageDocumentParseParser",
79
+ "BaseMessageChunk",
80
+ "CodeSnippets",
81
+ "LanguageModelInput",
82
+ "UsageMetadata",
83
+ "PlayWrightBot",
84
+ "PlaywrightLaunchOptions",
85
+ "PlaywrightOptions",
86
+ "PlaywrightPersistencyOptions",
87
+ "get_default_playwright_launch_options",
88
+ "acaption_markdown_images",
89
+ "caption_markdown_images",
90
+ "MarkdownLink",
91
+ "PdfToMarkdown",
92
+ "extract_text_from_pdf",
93
+ "open_pdf",
94
+ "render_pdf_as_image",
95
+ "ArgumentSpec",
96
+ "BaseArguments",
60
97
  ]
@@ -0,0 +1,21 @@
1
+ from .io import (
2
+ BytesReadable,
3
+ BytesWritable,
4
+ FileDescriptorOrPath,
5
+ PathOrReadable,
6
+ Readable,
7
+ StringReadable,
8
+ StringWritable,
9
+ Writable,
10
+ )
11
+
12
+ __all__ = [
13
+ "BytesReadable",
14
+ "BytesWritable",
15
+ "FileDescriptorOrPath",
16
+ "PathOrReadable",
17
+ "Readable",
18
+ "StringReadable",
19
+ "StringWritable",
20
+ "Writable",
21
+ ]
@@ -0,0 +1,19 @@
1
+ import os
2
+ from io import BufferedReader, BufferedWriter, BytesIO, StringIO, TextIOWrapper
3
+ from typing import TypeAlias
4
+
5
+ # Type aliases for callback functions and file descriptors
6
+ FileDescriptorOrPath: TypeAlias = int | str | bytes | os.PathLike[str] | os.PathLike[bytes]
7
+
8
+ # Type aliases for different types of IO objects
9
+ BytesReadable: TypeAlias = BytesIO | BufferedReader
10
+ BytesWritable: TypeAlias = BytesIO | BufferedWriter
11
+ StringReadable: TypeAlias = StringIO | TextIOWrapper
12
+ StringWritable: TypeAlias = StringIO | TextIOWrapper
13
+
14
+ # Combined type aliases for readable and writable objects
15
+ Readable: TypeAlias = BytesReadable | StringReadable
16
+ Writable: TypeAlias = BytesWritable | StringWritable
17
+
18
+ # Type alias for path or readable object
19
+ PathOrReadable: TypeAlias = FileDescriptorOrPath | Readable
@@ -0,0 +1,353 @@
1
+ import sys
2
+ from typing import TYPE_CHECKING, Any, Callable, Iterable, Optional
3
+
4
+ from langchain_core.messages import (
5
+ AIMessage,
6
+ BaseMessage,
7
+ HumanMessage,
8
+ SystemMessage,
9
+ )
10
+ from langchain_core.runnables import RunnableConfig
11
+ from pydantic import BaseModel, Field
12
+
13
+ from .language_model import Chatterer
14
+ from .utils.code_agent import (
15
+ DEFAULT_CODE_GENERATION_PROMPT,
16
+ DEFAULT_FUNCTION_REFERENCE_PREFIX_PROMPT,
17
+ DEFAULT_FUNCTION_REFERENCE_SEPARATOR,
18
+ CodeExecutionResult,
19
+ FunctionSignature,
20
+ augment_prompt_for_toolcall,
21
+ get_default_repl_tool,
22
+ )
23
+
24
+ if TYPE_CHECKING:
25
+ # Import only for type hinting to avoid circular dependencies if necessary
26
+ from langchain_experimental.tools.python.tool import PythonAstREPLTool
27
+
28
+
29
+ # --- Pydantic Models ---
30
+
31
+
32
+ class ThinkBeforeSpeak(BaseModel):
33
+ """
34
+ Analyze the user's request and formulate an initial plan.
35
+ This involves understanding the core task and breaking it down into logical steps.
36
+ """
37
+
38
+ task: str = Field(description="A concise summary of the user's overall goal or question.")
39
+ plans: list[str] = Field(
40
+ description="A sequence of actionable steps required to address the user's task. "
41
+ "Each step should be clear and logical. Indicate if a step likely requires code execution."
42
+ )
43
+
44
+
45
+ class IsToolCallNeeded(BaseModel):
46
+ """
47
+ Determine if executing Python code is the necessary *next* action.
48
+ Carefully review the most recent messages, especially the last code execution output and review (if any).
49
+ """
50
+
51
+ is_tool_call_needed: bool = Field(
52
+ description="Set to True ONLY if the *next logical step* requires executing Python code AND the previous step (if it involved code) did not already attempt this exact action and fail or produce unusable results. If the last code execution failed to achieve its goal (e.g., wrong data, error), set to False unless you plan to execute *different* code to overcome the previous issue. Set to False if the next step is reasoning, asking questions, or formulating a response based on existing information (including failed tool attempts)."
53
+ )
54
+
55
+
56
+ class ReviewOnToolcall(BaseModel):
57
+ """
58
+ Evaluate the outcome of the Python code execution and decide the subsequent action.
59
+ Critically assess if the execution achieved the intended goal and if the output is usable.
60
+ """
61
+
62
+ review_on_code_execution: str = Field(
63
+ description="A critical analysis of the code execution result. Did it succeed technically? Did it produce the *expected and usable* output according to the plan? Explicitly mention any errors, unexpected values (like incorrect dates), or unusable results."
64
+ )
65
+ next_action: str = Field(
66
+ description="Describe the *immediate next logical action* based on the review. **If the execution failed or yielded unusable/unexpected results, DO NOT suggest repeating the exact same code execution.** Instead, propose a different action, such as: 'Try a different code approach to get the time', 'Inform the user about the environmental issue with the date', 'Ask the user to verify the result', or 'Abandon this approach and try something else'. If the execution was successful and useful, describe the next step in the plan (e.g., 'Use the retrieved time to formulate the answer')."
67
+ )
68
+ is_task_completed: bool = Field(
69
+ description="Set to True ONLY IF the *overall user task* is now fully addressed OR if the *only remaining action* based on the review is to generate the final response/answer directly to the user (this includes informing the user about an unresolvable issue found during execution). Set to False if further *productive* intermediate steps (like trying different code, processing data further, asking for input) are needed before the final response."
70
+ )
71
+
72
+
73
+ class Think(BaseModel):
74
+ """
75
+ Engage in reasoning when code execution is not the immediate next step.
76
+ This could involve synthesizing information, preparing the final answer, or identifying missing information.
77
+ """
78
+
79
+ my_thinking: str = Field(
80
+ description="Explain your reasoning process. Why is code execution not needed now? "
81
+ "What information are you using from the context? How are you planning to formulate the response or proceed?"
82
+ )
83
+ next_action: str = Field(
84
+ description="Describe the *immediate next action* resulting from this thinking process. "
85
+ "Examples: 'Formulate the final answer to the user', 'Ask the user a clarifying question', "
86
+ "'Summarize the findings so far'."
87
+ )
88
+ # --- MODIFIED DESCRIPTION ---
89
+ is_task_completed: bool = Field(
90
+ description="Set this to True IF AND ONLY IF the 'next_action' you just described involves generating the final response, explanation, or answer directly for the user, based on the reasoning in 'my_thinking'. If the 'next_action' involves asking the user a question, planning *further* internal steps (beyond formulating the immediate response), or indicates the task cannot be completed yet, set this to False. **If the plan is simply to tell the user the answer now, set this to True.**"
91
+ )
92
+ # --- END OF MODIFICATION ---
93
+
94
+
95
+ # --- Interactive Shell Function ---
96
+
97
+
98
+ def interactive_shell(
99
+ chatterer: Chatterer = Chatterer.openai(), # Assuming Chatterer.openai() is correct
100
+ system_instruction: BaseMessage | Iterable[BaseMessage] = ([
101
+ SystemMessage(
102
+ "You are an AI assistant capable of answering questions and executing Python code to help users solve tasks."
103
+ ),
104
+ ]),
105
+ repl_tool: Optional["PythonAstREPLTool"] = None,
106
+ prompt_for_code_invoke: Optional[str] = DEFAULT_CODE_GENERATION_PROMPT,
107
+ additional_callables: Optional[Callable[..., object] | Iterable[Callable[..., object]]] = None,
108
+ function_reference_prefix: Optional[str] = DEFAULT_FUNCTION_REFERENCE_PREFIX_PROMPT,
109
+ function_reference_seperator: str = DEFAULT_FUNCTION_REFERENCE_SEPARATOR,
110
+ config: Optional[RunnableConfig] = None,
111
+ stop: Optional[list[str]] = None,
112
+ **kwargs: Any,
113
+ ) -> None:
114
+ try:
115
+ from rich.console import Console
116
+ from rich.panel import Panel
117
+ from rich.prompt import Prompt
118
+
119
+ console = Console()
120
+ # Style settings
121
+ AI_STYLE = "bold bright_blue"
122
+ EXECUTED_CODE_STYLE = "bold bright_yellow"
123
+ OUTPUT_STYLE = "bold bright_cyan"
124
+ THINKING_STYLE = "dim white"
125
+ except ImportError:
126
+ raise ImportError("Rich library not found. Please install it: pip install rich")
127
+
128
+ def respond(messages: list[BaseMessage]) -> str:
129
+ response = ""
130
+ if "rich" not in sys.modules:
131
+ for chunk in chatterer.generate_stream(messages=messages):
132
+ print(chunk, end="", flush=True)
133
+ response += chunk
134
+ print()
135
+ else:
136
+ with console.status("[bold yellow]AI is thinking..."):
137
+ response_panel = Panel("", title="AI Response", style=AI_STYLE, border_style="blue")
138
+ current_content = ""
139
+ for chunk in chatterer.generate_stream(messages=messages):
140
+ current_content += chunk
141
+ # Update renderable (might not display smoothly without Live)
142
+ response_panel.renderable = current_content
143
+ response = current_content
144
+ console.print(Panel(response, title="AI Response", style=AI_STYLE))
145
+ return response.strip()
146
+
147
+ def complete_task(think_before_speak: ThinkBeforeSpeak) -> None:
148
+ task_info = f"[bold]Task:[/bold] {think_before_speak.task}\n[bold]Plans:[/bold]\n- " + "\n- ".join(
149
+ think_before_speak.plans
150
+ )
151
+ console.print(Panel(task_info, title="Task Analysis & Plan", style="magenta"))
152
+ session_messages: list[BaseMessage] = [
153
+ AIMessage(
154
+ content=f"Okay, I understand the task. Here's my plan:\n"
155
+ f"- Task Summary: {think_before_speak.task}\n"
156
+ f"- Steps:\n" + "\n".join(f" - {p}" for p in think_before_speak.plans)
157
+ )
158
+ ]
159
+
160
+ while True:
161
+ current_context = context + session_messages
162
+ is_tool_call_needed: IsToolCallNeeded = chatterer.generate_pydantic(
163
+ response_model=IsToolCallNeeded,
164
+ messages=augment_prompt_for_toolcall(
165
+ function_signatures=function_signatures,
166
+ messages=current_context,
167
+ prompt_for_code_invoke=prompt_for_code_invoke,
168
+ function_reference_prefix=function_reference_prefix,
169
+ function_reference_seperator=function_reference_seperator,
170
+ ),
171
+ config=config,
172
+ stop=stop,
173
+ **kwargs,
174
+ )
175
+
176
+ if is_tool_call_needed.is_tool_call_needed:
177
+ # --- Code Execution Path ---
178
+ code_execution: CodeExecutionResult = chatterer.invoke_code_execution(
179
+ messages=current_context,
180
+ repl_tool=repl_tool,
181
+ prompt_for_code_invoke=prompt_for_code_invoke,
182
+ function_signatures=function_signatures,
183
+ function_reference_prefix=function_reference_prefix,
184
+ function_reference_seperator=function_reference_seperator,
185
+ config=config,
186
+ stop=stop,
187
+ **kwargs,
188
+ )
189
+ code_block_display = (
190
+ f"[bold]Executed Code:[/bold]\n```python\n{code_execution.code}\n```\n\n"
191
+ f"[bold]Output:[/bold]\n{code_execution.output}"
192
+ )
193
+ console.print(
194
+ Panel(code_block_display, title="Code Execution", style=EXECUTED_CODE_STYLE, border_style="yellow")
195
+ )
196
+ tool_call_message = AIMessage(
197
+ content=f"I executed the following code:\n```python\n{code_execution.code}\n```\n**Output:**\n{code_execution.output}"
198
+ )
199
+ session_messages.append(tool_call_message)
200
+
201
+ # --- Review Code Execution ---
202
+ current_context_after_exec = context + session_messages
203
+ decision = chatterer.generate_pydantic(
204
+ response_model=ReviewOnToolcall,
205
+ messages=augment_prompt_for_toolcall(
206
+ function_signatures=function_signatures,
207
+ messages=current_context_after_exec,
208
+ prompt_for_code_invoke=prompt_for_code_invoke,
209
+ function_reference_prefix=function_reference_prefix,
210
+ function_reference_seperator=function_reference_seperator,
211
+ ),
212
+ config=config,
213
+ stop=stop,
214
+ **kwargs,
215
+ )
216
+ review_text = (
217
+ f"[bold]Review:[/bold] {decision.review_on_code_execution.strip()}\n"
218
+ f"[bold]Next Action:[/bold] {decision.next_action.strip()}"
219
+ )
220
+ console.print(Panel(review_text, title="Execution Review", style=OUTPUT_STYLE, border_style="cyan"))
221
+ review_message = AIMessage(
222
+ content=f"**Review of Execution:** {decision.review_on_code_execution.strip()}\n"
223
+ f"**Next Action:** {decision.next_action.strip()}"
224
+ )
225
+ session_messages.append(review_message)
226
+
227
+ # --- Check Completion after Review ---
228
+ if decision.is_task_completed:
229
+ console.print(
230
+ Panel("[bold green]Task Completed![/bold green]", title="Status", border_style="green")
231
+ )
232
+ break # Exit loop
233
+ else:
234
+ # --- Thinking Path (No Code Needed) ---
235
+ current_context_before_think = context + session_messages
236
+ decision = chatterer.generate_pydantic(
237
+ response_model=Think, # Uses updated description
238
+ messages=augment_prompt_for_toolcall(
239
+ function_signatures=function_signatures,
240
+ messages=current_context_before_think,
241
+ prompt_for_code_invoke=prompt_for_code_invoke,
242
+ function_reference_prefix=function_reference_prefix,
243
+ function_reference_seperator=function_reference_seperator,
244
+ ),
245
+ config=config,
246
+ stop=stop,
247
+ **kwargs,
248
+ )
249
+ thinking_text = (
250
+ f"[dim]Reasoning:[/dim] {decision.my_thinking.strip()}\n"
251
+ f"[bold]Next Action:[/bold] {decision.next_action.strip()}"
252
+ )
253
+ console.print(
254
+ Panel(
255
+ thinking_text, title="AI Thought Process (No Code)", style=THINKING_STYLE, border_style="white"
256
+ )
257
+ )
258
+ thinking_message = AIMessage(
259
+ content=f"**My Reasoning (without code execution):** {decision.my_thinking.strip()}\n"
260
+ f"**Next Action:** {decision.next_action.strip()}"
261
+ )
262
+ session_messages.append(thinking_message)
263
+
264
+ # --- Check Completion after Thinking ---
265
+ # This check now relies on the LLM correctly interpreting the updated
266
+ # description for Think.is_task_completed
267
+ if decision.is_task_completed:
268
+ console.print(
269
+ Panel("[bold green]Task Completed![/bold green]", title="Status", border_style="green")
270
+ )
271
+ break # Exit loop
272
+
273
+ # --- End of Loop ---
274
+ # Generate and display the final response based on the *entire* session history
275
+ final_response_messages = context + session_messages
276
+ response: str = respond(final_response_messages)
277
+ # Add the final AI response to the main context
278
+ context.append(AIMessage(content=response))
279
+
280
+ # --- Shell Initialization and Main Loop ---
281
+ if repl_tool is None:
282
+ repl_tool = get_default_repl_tool()
283
+
284
+ if additional_callables:
285
+ if callable(additional_callables):
286
+ additional_callables = [additional_callables]
287
+
288
+ function_signatures: list[FunctionSignature] = FunctionSignature.from_callable(list(additional_callables))
289
+ else:
290
+ function_signatures: list[FunctionSignature] = []
291
+
292
+ context: list[BaseMessage] = []
293
+ if system_instruction:
294
+ if isinstance(system_instruction, BaseMessage):
295
+ context.append(system_instruction)
296
+ elif isinstance(system_instruction, str):
297
+ context.append(SystemMessage(content=system_instruction))
298
+ else:
299
+ context.extend(list(system_instruction))
300
+
301
+ console.print(
302
+ Panel(
303
+ "Welcome to the Interactive Chatterer Shell!\nType 'quit' or 'exit' to end the conversation.",
304
+ title="Welcome",
305
+ style=AI_STYLE,
306
+ border_style="blue",
307
+ )
308
+ )
309
+
310
+ while True:
311
+ try:
312
+ user_input = Prompt.ask("[bold green]You[/bold green]")
313
+ except EOFError:
314
+ user_input = "exit"
315
+
316
+ if user_input.strip().lower() in ["quit", "exit"]:
317
+ console.print(Panel("Goodbye!", title="Exit", style=AI_STYLE, border_style="blue"))
318
+ break
319
+
320
+ context.append(HumanMessage(content=user_input.strip()))
321
+
322
+ try:
323
+ # Initial planning step
324
+ initial_plan_decision = chatterer.generate_pydantic(
325
+ response_model=ThinkBeforeSpeak,
326
+ messages=augment_prompt_for_toolcall(
327
+ function_signatures=function_signatures,
328
+ messages=context,
329
+ prompt_for_code_invoke=prompt_for_code_invoke,
330
+ function_reference_prefix=function_reference_prefix,
331
+ function_reference_seperator=function_reference_seperator,
332
+ ),
333
+ config=config,
334
+ stop=stop,
335
+ **kwargs,
336
+ )
337
+ # Execute the task completion loop
338
+ complete_task(initial_plan_decision)
339
+
340
+ except Exception as e:
341
+ import traceback
342
+
343
+ console.print(
344
+ Panel(
345
+ f"[bold red]An error occurred:[/bold red]\n{e}\n\n[yellow]Traceback:[/yellow]\n{traceback.format_exc()}",
346
+ title="Error",
347
+ border_style="red",
348
+ )
349
+ )
350
+
351
+
352
+ if __name__ == "__main__":
353
+ interactive_shell()